/******************************************************************************
* crash.c
- *
+ *
* Based heavily on arch/i386/kernel/crash.c from Linux 2.6.16
*
* Xen port written by:
static int crash_nmi_callback(struct cpu_user_regs *regs, int cpu)
{
- /* Don't do anything if this handler is invoked on crashing cpu.
- * Otherwise, system will completely hang. Crashing cpu can get
- * an NMI if system was initially booted with nmi_watchdog parameter.
- */
- if (cpu == crashing_cpu)
- return 1;
- local_irq_disable();
+ /* Don't do anything if this handler is invoked on crashing cpu.
+ * Otherwise, system will completely hang. Crashing cpu can get
+ * an NMI if system was initially booted with nmi_watchdog parameter.
+ */
+ if ( cpu == crashing_cpu )
+ return 1;
+ local_irq_disable();
machine_crash_save_cpu();
- disable_local_APIC();
- atomic_dec(&waiting_for_crash_ipi);
- hvm_disable();
+ disable_local_APIC();
+ atomic_dec(&waiting_for_crash_ipi);
+ hvm_disable();
for ( ; ; )
__asm__ __volatile__ ( "hlt" );
- return 1;
+ return 1;
}
/*
{
cpumask_t allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
+ cpu_clear(smp_processor_id(), allbutself);
send_IPI_mask(allbutself, APIC_DM_NMI);
}
static void nmi_shootdown_cpus(void)
{
- unsigned long msecs;
-
- atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
- /* Would it be better to replace the trap vector here? */
- set_nmi_callback(crash_nmi_callback);
- /* Ensure the new callback function is set before sending
- * out the NMI
- */
- wmb();
-
- smp_send_nmi_allbutself();
-
- msecs = 1000; /* Wait at most a second for the other cpus to stop */
- while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
- mdelay(1);
- msecs--;
- }
-
- /* Leave the nmi callback set */
+ unsigned long msecs;
+
+ atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+ /* Would it be better to replace the trap vector here? */
+ set_nmi_callback(crash_nmi_callback);
+ /* Ensure the new callback function is set before sending
+ * out the NMI
+ */
+ wmb();
+
+ smp_send_nmi_allbutself();
+
+ msecs = 1000; /* Wait at most a second for the other cpus to stop */
+ while ( (atomic_read(&waiting_for_crash_ipi) > 0) && msecs )
+ {
+ mdelay(1);
+ msecs--;
+ }
+
+ /* Leave the nmi callback set */
disable_local_APIC();
}
#endif
void machine_crash_shutdown(void)
{
- printk("machine_crash_shutdown: %d\n", smp_processor_id());
- local_irq_disable();
+ printk("machine_crash_shutdown: %d\n", smp_processor_id());
+ local_irq_disable();
#ifdef CONFIG_SMP
- nmi_shootdown_cpus();
+ nmi_shootdown_cpus();
#endif
#ifdef CONFIG_X86_IO_APIC
* indent-tabs-mode: nil
* End:
*/
-
/******************************************************************************
* machine_kexec.c
- *
+ *
* Xen port written by:
* - Simon 'Horms' Horman <horms@verge.net.au>
* - Magnus Damm <magnus@valinux.co.jp>
* in every odd index in page_list[].
*/
- for (k = 0; k < KEXEC_XEN_NO_PAGES; k++) {
- if ((k & 1) == 0) { /* even pages: machine address */
+ for ( k = 0; k < KEXEC_XEN_NO_PAGES; k++ )
+ {
+ if ( (k & 1) == 0 )
+ {
+ /* Even pages: machine address. */
prev_ma = image->page_list[k];
}
- else { /* odd pages: va for previous ma */
+ else
+ {
+ /* Odd pages: va for previous ma. */
set_fixmap(fix_base + (k >> 1), prev_ma);
image->page_list[k] = fix_to_virt(fix_base + (k >> 1));
}
}
- return 0;
+ return 0;
}
void machine_kexec_unload(int type, int slot, xen_kexec_image_t *image)
{
}
-
+
static void __machine_shutdown(void *data)
{
xen_kexec_image_t *image = (xen_kexec_image_t *)data;
machine_kexec(image);
}
-
+
void machine_shutdown(xen_kexec_image_t *image)
{
int reboot_cpu_id;
reboot_cpu_id = 0;
- if (!cpu_isset(reboot_cpu_id, cpu_online_map))
+ if ( !cpu_isset(reboot_cpu_id, cpu_online_map) )
reboot_cpu_id = smp_processor_id();
-
- if (reboot_cpu_id != smp_processor_id()) {
+
+ if ( reboot_cpu_id != smp_processor_id() )
+ {
cpus_clear(reboot_cpu);
cpu_set(reboot_cpu_id, reboot_cpu);
on_selected_cpus(reboot_cpu, __machine_shutdown, image, 1, 0);
; /* nothing */
}
else
+ {
__machine_shutdown(image);
+ }
BUG();
}
printk(KERN_INFO "CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
}
-void __init move_memory(unsigned long dst,
+void __init move_memory(unsigned long dst,
unsigned long src_start, unsigned long src_end)
{
#if defined(CONFIG_X86_32)
- memmove((void *)dst, /* use low mapping */
+ memmove((void *)dst, /* use low mapping */
(void *)src_start, /* use low mapping */
src_end - src_start);
#elif defined(CONFIG_X86_64)
}
machine_kexec_reserved(&crash_area);
- if (crash_area.size > 0) {
+ if ( crash_area.size > 0 )
+ {
unsigned long kdump_start, kdump_size, k;
- /* mark images pages as free for now */
+ /* Mark images pages as free for now. */
init_boot_pages(initial_images_start, initial_images_end);
kdump_start = crash_area.start;
kdump_size = crash_area.size;
- printk("Kdump: %luMB (%lukB) at 0x%lx\n",
+ printk("Kdump: %luMB (%lukB) at 0x%lx\n",
kdump_size >> 20,
kdump_size >> 10,
kdump_start);
- if ((kdump_start & ~PAGE_MASK) || (kdump_size & ~PAGE_MASK))
+ if ( (kdump_start & ~PAGE_MASK) || (kdump_size & ~PAGE_MASK) )
panic("Kdump parameters not page aligned\n");
kdump_start >>= PAGE_SHIFT;
k = alloc_boot_pages_at(kdump_size, kdump_start);
- if (k != kdump_start)
+ if ( k != kdump_start )
panic("Unable to reserve Kdump memory\n");
/* allocate pages for relocated initial images */
k = alloc_boot_pages(k, 1);
- if (!k)
+ if ( !k )
panic("Unable to allocate initial images memory\n");
move_memory(k << PAGE_SHIFT, initial_images_start, initial_images_end);
initial_images_end -= initial_images_start;
initial_images_start = k << PAGE_SHIFT;
initial_images_end += initial_images_start;
- }
+ }
memguard_init();
percpu_guard_areas();
- printk("System RAM: %luMB (%lukB)\n",
+ printk("System RAM: %luMB (%lukB)\n",
nr_pages >> (20 - PAGE_SHIFT),
nr_pages << (PAGE_SHIFT - 10));
total_pages = nr_pages;
/******************************************************************************
* kexec.c - Achitecture independent kexec code for Xen
- *
+ *
* Xen port written by:
* - Simon 'Horms' Horman <horms@verge.net.au>
* - Magnus Damm <magnus@valinux.co.jp>
static void one_cpu_only(void)
{
/* Only allow the first cpu to continue - force other cpus to spin */
- if (test_and_set_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags))
+ if ( test_and_set_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags) )
{
while (1);
}
int cpu = smp_processor_id();
crash_note_t *cntp;
- if (!cpu_test_and_set(cpu, crash_saved_cpus))
+ if ( !cpu_test_and_set(cpu, crash_saved_cpus) )
{
cntp = &per_cpu(crash_notes, cpu);
elf_core_save_regs(&cntp->core.desc.desc.pr_reg,
setup_crash_note(cntp, core, CORE_STR, CORE_STR_LEN, NT_PRSTATUS);
/* setup crash note "Xen", XEN_ELFNOTE_CRASH_REGS */
- setup_crash_note(cntp, xen_regs, XEN_STR, XEN_STR_LEN,
+ setup_crash_note(cntp, xen_regs, XEN_STR, XEN_STR_LEN,
XEN_ELFNOTE_CRASH_REGS);
}
}
cntp = &per_cpu(crash_notes, cpu);
/* setup crash note "Xen", XEN_ELFNOTE_CRASH_INFO */
- setup_crash_note(cntp, xen_info, XEN_STR, XEN_STR_LEN,
+ setup_crash_note(cntp, xen_info, XEN_STR, XEN_STR_LEN,
XEN_ELFNOTE_CRASH_INFO);
info = &cntp->xen_info.desc.desc;
xen_kexec_image_t *image;
one_cpu_only();
-
+
machine_crash_save_cpu();
crashing_cpu = smp_processor_id();
pos = (test_bit(KEXEC_FLAG_CRASH_POS, &kexec_flags) != 0);
- if (test_bit(KEXEC_IMAGE_CRASH_BASE + pos, &kexec_flags))
+ if ( test_bit(KEXEC_IMAGE_CRASH_BASE + pos, &kexec_flags) )
{
image = &kexec_image[KEXEC_IMAGE_CRASH_BASE + pos];
machine_kexec(image); /* Does not return */
{
unsigned long val[2];
char *str = opt_crashkernel;
- int k = 0;
+ int k = 0;
memset(reservation, 0, sizeof(*reservation));
static int kexec_get_reserve(xen_kexec_range_t *range)
{
xen_kexec_reserve_t reservation;
-
+
machine_kexec_reserved(&reservation);
range->start = reservation.start;
static int kexec_get_xen(xen_kexec_range_t *range, int get_ma)
{
- if (get_ma)
+ if ( get_ma )
range->start = virt_to_maddr(&_text);
else
range->start = (unsigned long) &_text;
static int kexec_get_cpu(xen_kexec_range_t *range)
{
- if (range->nr < 0 || range->nr >= num_present_cpus())
+ if ( range->nr < 0 || range->nr >= num_present_cpus() )
return -EINVAL;
range->start = __pa((unsigned long)&per_cpu(crash_notes, range->nr));
{
xen_kexec_range_t range;
int ret = -EINVAL;
-
- if (unlikely(copy_from_guest(&range, uarg, 1)))
+
+ if ( unlikely(copy_from_guest(&range, uarg, 1)) )
return -EFAULT;
- switch (range.range)
+ switch ( range.range )
{
case KEXEC_RANGE_MA_CRASH:
ret = kexec_get_reserve(&range);
break;
}
- if (ret == 0 && unlikely(copy_to_guest(uarg, &range, 1)))
+ if ( ret == 0 && unlikely(copy_to_guest(uarg, &range, 1)) )
return -EFAULT;
-
+
return ret;
}
static int kexec_load_get_bits(int type, int *base, int *bit)
{
- switch (type)
+ switch ( type )
{
case KEXEC_TYPE_DEFAULT:
*base = KEXEC_IMAGE_DEFAULT_BASE;
int base, bit, pos;
int ret = 0;
- if (unlikely(copy_from_guest(&load, uarg, 1)))
+ if ( unlikely(copy_from_guest(&load, uarg, 1)) )
return -EFAULT;
- if (kexec_load_get_bits(load.type, &base, &bit))
+ if ( kexec_load_get_bits(load.type, &base, &bit) )
return -EINVAL;
pos = (test_bit(bit, &kexec_flags) != 0);
/* Load the user data into an unused image */
- if (op == KEXEC_CMD_kexec_load)
+ if ( op == KEXEC_CMD_kexec_load )
{
image = &kexec_image[base + !pos];
BUG_ON(test_bit((base + !pos), &kexec_flags)); /* must be free */
memcpy(image, &load.image, sizeof(*image));
-
- if (!(ret = machine_kexec_load(load.type, base + !pos, image)))
+
+ if ( !(ret = machine_kexec_load(load.type, base + !pos, image)) )
{
/* Set image present bit */
set_bit((base + !pos), &kexec_flags);
}
/* Unload the old image if present and load successful */
- if (ret == 0 && !test_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags))
+ if ( ret == 0 && !test_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags) )
{
- if (test_and_clear_bit((base + pos), &kexec_flags))
+ if ( test_and_clear_bit((base + pos), &kexec_flags) )
{
image = &kexec_image[base + pos];
machine_kexec_unload(load.type, base + pos, image);
xen_kexec_image_t *image;
int base, bit, pos;
- if (unlikely(copy_from_guest(&exec, uarg, 1)))
+ if ( unlikely(copy_from_guest(&exec, uarg, 1)) )
return -EFAULT;
- if (kexec_load_get_bits(exec.type, &base, &bit))
+ if ( kexec_load_get_bits(exec.type, &base, &bit) )
return -EINVAL;
pos = (test_bit(bit, &kexec_flags) != 0);
/* Only allow kexec/kdump into loaded images */
- if (!test_bit(base + pos, &kexec_flags))
+ if ( !test_bit(base + pos, &kexec_flags) )
return -ENOENT;
switch (exec.type)
unsigned long flags;
int ret = -EINVAL;
- if ( !IS_PRIV(current->domain) )
+ if ( !IS_PRIV(current->domain) )
return -EPERM;
- switch (op)
+ switch ( op )
{
case KEXEC_CMD_kexec_get_range:
ret = kexec_get_range(uarg);